xxxxxxxxxx# IntroductionThis assignment is based off of this 2D object detection tutorial which uses pytorch to implement the SSD network in order to detect objects in images within the VOC Dataset.https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-DetectionThis assignment is based off of this 2D object detection tutorial which uses pytorch to implement the SSD network in order to detect objects in images within the VOC Dataset. https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Object-Detection
xxxxxxxxxx!pip uninstall torch torchvision torchtext torchaudio -y!pip install torch==1.12+cu116 torchvision==0.13+cu116 --extra-index-url https://download.pytorch.org/whl/cu116xxxxxxxxxxRESET_DATA = Falsexxxxxxxxxx# Download dataset and create json filesOnly the mount portion has to be run if you already have the dataset downloaded and the json files.Only the mount portion has to be run if you already have the dataset downloaded and the json files.
xxxxxxxxxxFirst we mount our google driveFirst we mount our google drive
xxxxxxxxxxif RESET_DATA: from google.colab import drive drive.mount('/content/gdrive') # Go to the your assignment directory %cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/xxxxxxxxxxNext download the VOC 2007 Dataset. This takes 6.2 minutes.Next download the VOC 2007 Dataset. This takes 6.2 minutes.
xxxxxxxxxxif RESET_DATA: import requests import tarfile import io import time def download_and_unzip(url, path): dl_start = time.time() r = requests.get(url) dl_end = time.time() print("download time elapsed:", dl_end - dl_start) tar = tarfile.TarFile(fileobj=io.BytesIO(r.content)) # extract the contents of VOC2007 extract_start = time.time() subdir_and_files = [ tarinfo for tarinfo in tar.getmembers() if tarinfo.name.startswith("VOCdevkit/VOC2007/") ] tar.extractall(path=path, members=subdir_and_files) extract_end = time.time() print("extract time elapsed:", extract_end - extract_start) # Go to the your assignment directory %cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/ start = time.time() download_and_unzip( "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar", "/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4" ) download_and_unzip( "http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar", "/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4" ) end = time.time() print("total time elapsed:", end - start)xxxxxxxxxxSync the data to your google drive. This should take 33 minutes. You must restart the runtime after this by clicking Runtime -> Restart runtime.Sync the data to your google drive. This should take 33 minutes. You must restart the runtime after this by clicking Runtime -> Restart runtime.
xxxxxxxxxxif RESET_DATA: start = time.time() drive.flush_and_unmount() end = time.time() print("total time elapsed:", end - start)xxxxxxxxxxCheck that the data is downloaded and that you have the json files. This also remounts the google drive.Check that the data is downloaded and that you have the json files. This also remounts the google drive.
xxxxxxxxxxfrom google.colab import drivedrive.mount('/content/gdrive', force_remount=True)# Go to the your assignment directory%cd /content/gdrive/MyDrive/'Colab Notebooks'/ece495_assignment4/# Check location!ls## You should have this output:# /content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4# ece495_assignment4.ipynb utils.py VOCdevkit# You should also have the json files# and also the checkpoint if you have already trained the modelxxxxxxxxxxThis code does not have to be run, the files it creates are given with the assignment. It creates the json files: label_map.json, TRAIN_images.json, TRAIN_objects TEST_images.json and TEST_objects. These are the image paths, ground truth object information and label to number mapping. This should take about 45 miniutes if the data has not been cached.This code does not have to be run, the files it creates are given with the assignment. It creates the json files: label_map.json, TRAIN_images.json, TRAIN_objects TEST_images.json and TEST_objects. These are the image paths, ground truth object information and label to number mapping. This should take about 45 miniutes if the data has not been cached.
xxxxxxxxxxif RESET_DATA and False: from utils import create_data_lists import time start = time.time() create_data_lists(voc07_path='/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4/VOCdevkit/VOC2007', voc12_path=None, # Removed VOC 2012 to reduce data size requirement of this assignment output_folder='./') end = time.time() print("time elapsed:", end - start)xxxxxxxxxx# Create the VOC Dataset loaderNext the Dataset loader for VOC is implementedNext the Dataset loader for VOC is implemented
xxxxxxxxxximport torchfrom torch.utils.data import Datasetimport jsonimport osfrom PIL import Imagefrom utils import transformclass PascalVOCDataset(Dataset): """ A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches. """ def __init__(self, data_folder, split, keep_difficult=False): """ :param data_folder: folder where data files are stored :param split: split, one of 'TRAIN' or 'TEST' :param keep_difficult: keep or discard objects that are considered difficult to detect? """ self.split = split.upper() assert self.split in {'TRAIN', 'TEST'} self.data_folder = data_folder self.keep_difficult = keep_difficult # Read data files with open(os.path.join(data_folder, self.split + '_images.json'), 'r') as j: self.images = json.load(j) with open(os.path.join(data_folder, self.split + '_objects.json'), 'r') as j: self.objects = json.load(j) assert len(self.images) == len(self.objects) def __getitem__(self, i): # Read image image = Image.open(self.images[i], mode='r') image = image.convert('RGB') # Read objects in this image (bounding boxes, labels, difficulties) objects = self.objects[i] boxes = torch.FloatTensor(objects['boxes']) # (n_objects, 4) labels = torch.LongTensor(objects['labels']) # (n_objects) difficulties = torch.ByteTensor(objects['difficulties']) # (n_objects) # Discard difficult objects, if desired if not self.keep_difficult: boxes = boxes[1 - difficulties] labels = labels[1 - difficulties] difficulties = difficulties[1 - difficulties] # Apply transformations image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split) return image, boxes, labels, difficulties def __len__(self): return len(self.images) def collate_fn(self, batch): """ Since each image may have a different number of objects, we need a collate function (to be passed to the DataLoader). This describes how to combine these tensors of different sizes. We use lists. Note: this need not be defined in this Class, can be standalone. :param batch: an iterable of N sets from __getitem__() :return: a tensor of images, lists of varying-size tensors of bounding boxes, labels, and difficulties """ images = list() boxes = list() labels = list() difficulties = list() for b in batch: images.append(b[0]) boxes.append(b[1]) labels.append(b[2]) difficulties.append(b[3]) images = torch.stack(images, dim=0) return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors eachxxxxxxxxxx## Base layersFirst we create the base or encoder part of the network.You must fill in the ResNet code.First we create the base or encoder part of the network.
You must fill in the ResNet code.
xxxxxxxxxxfrom torch import nnfrom utils import *import torch.nn.functional as Ffrom math import sqrtfrom itertools import product as productimport torchvisiondevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")class VGGBase(nn.Module): """ VGG base convolutions to produce lower-level feature maps. """ def __init__(self): super(VGGBase, self).__init__() # Standard convolutional layers in VGG16 self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1) # stride = 1, by default self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) # ceiling (not floor) here for even dims self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1) self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) # retains size because stride is 1 (and padding) # Replacements for FC6 and FC7 in VGG16 self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6) # atrous convolution self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1) # Load pretrained layers self.load_pretrained_layers() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps conv4_3 and conv7 """ out = F.relu(self.conv1_1(image)) # (N, 64, 300, 300) out = F.relu(self.conv1_2(out)) # (N, 64, 300, 300) out = self.pool1(out) # (N, 64, 150, 150) out = F.relu(self.conv2_1(out)) # (N, 128, 150, 150) out = F.relu(self.conv2_2(out)) # (N, 128, 150, 150) out = self.pool2(out) # (N, 128, 75, 75) out = F.relu(self.conv3_1(out)) # (N, 256, 75, 75) out = F.relu(self.conv3_2(out)) # (N, 256, 75, 75) out = F.relu(self.conv3_3(out)) # (N, 256, 75, 75) out = self.pool3(out) # (N, 256, 38, 38), it would have been 37 if not for ceil_mode = True out = F.relu(self.conv4_1(out)) # (N, 512, 38, 38) out = F.relu(self.conv4_2(out)) # (N, 512, 38, 38) out = F.relu(self.conv4_3(out)) # (N, 512, 38, 38) conv4_3_feats = out # (N, 512, 38, 38) out = self.pool4(out) # (N, 512, 19, 19) out = F.relu(self.conv5_1(out)) # (N, 512, 19, 19) out = F.relu(self.conv5_2(out)) # (N, 512, 19, 19) out = F.relu(self.conv5_3(out)) # (N, 512, 19, 19) out = self.pool5(out) # (N, 512, 19, 19), pool5 does not reduce dimensions out = F.relu(self.conv6(out)) # (N, 1024, 19, 19) conv7_feats = F.relu(self.conv7(out)) # (N, 1024, 19, 19) # Lower-level feature maps return conv4_3_feats, conv7_feats def load_pretrained_layers(self): """ As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network. There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16 We copy these parameters into our network. It's straightforward for conv1 to conv5. However, the original VGG-16 does not contain the conv6 and con7 layers. Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py. """ # Current state of base state_dict = self.state_dict() param_names = list(state_dict.keys()) # Pretrained VGG base pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict() pretrained_param_names = list(pretrained_state_dict.keys()) # Transfer conv. parameters from pretrained model to current model for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters state_dict[param] = pretrained_state_dict[pretrained_param_names[i]] # Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7 # fc6 conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7) conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096) state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3) state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024) # fc7 conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1) conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096) state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1) state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024) # Note: an FC layer of size (K) operating on a flattened version (C*H*W) of a 2D image of size (C, H, W)... # ...is equivalent to a convolutional layer with kernel size (H, W), input channels C, output channels K... # ...operating on the 2D image of size (C, H, W) without padding self.load_state_dict(state_dict) # print("\nLoaded base model.\n")class ResNetBase(nn.Module): """ ResNet base convolutions to produce lower-level feature maps. """ def __init__(self): super(ResNetBase, self).__init__() # TODO: Load pretrained resnet model # raise NotImplementedError self.resnet = torchvision.models.resnet50(pretrained=True) def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: lower-level feature maps """ # TODO: Add your code # raise NotImplementedError img_dims = image.shape # Layer 1 x0 = self.resnet.conv1(image) # print('x0', x0.shape) x0 = self.resnet.bn1(x0) # print('x0', x0.shape) x0 = self.resnet.relu(x0) # print('x0', x0.shape) x0 = self.resnet.maxpool(x0) # print('x0', x0.shape) # Layer 2 x1 = self.resnet.layer1(x0) # print('x1', x1.shape) # Layer 3 x2 = self.resnet.layer2(x1) # print('x2', x2.shape) # x2_dims = x2.size(2), x2.size(3) # Layer 4 x3 = self.resnet.layer3(x2) # print('x3', x3.shape) # Layer 5 x4 = self.resnet.layer4(x3) # print('x4', x4.shape) conv_512_feats, conv_1024_feats = x2, x3 # Lower-level feature maps return conv_512_feats, conv_1024_featsxxxxxxxxxx# # quick test of my resnetbase# untrained_net = ResNetBase().to(device)# untrained_net.eval()# import torch# print(torch.__version__)# test1 = torch.rand((1,3,300,300))# f1, f2 = untrained_net.forward(test1)# print(f1.shape, f2.shape)xxxxxxxxxx## Auxiliary layersThe base layers created the low level feature maps with 512 and 1024 features.Now the higher level feature maps are created for 512, 256, 256 and 256 feature maps.The base layers created the low level feature maps with 512 and 1024 features. Now the higher level feature maps are created for 512, 256, 256 and 256 feature maps.
xxxxxxxxxxclass AuxiliaryConvolutions(nn.Module): """ Additional convolutions to produce higher-level feature maps. """ def __init__(self): super(AuxiliaryConvolutions, self).__init__() # Auxiliary/additional convolutions on top of the VGG base self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0) # stride = 1, by default self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1 self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0) self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1) # dim. reduction because stride > 1 self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0 self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0) self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0) # dim. reduction because padding = 0 # Initialize convolutions' parameters self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.) def forward(self, conv7_feats): """ Forward propagation. :param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2 """ out = F.relu(self.conv8_1(conv7_feats)) # (N, 256, 19, 19) out = F.relu(self.conv8_2(out)) # (N, 512, 10, 10) conv8_2_feats = out # (N, 512, 10, 10) out = F.relu(self.conv9_1(out)) # (N, 128, 10, 10) out = F.relu(self.conv9_2(out)) # (N, 256, 5, 5) conv9_2_feats = out # (N, 256, 5, 5) out = F.relu(self.conv10_1(out)) # (N, 128, 5, 5) out = F.relu(self.conv10_2(out)) # (N, 256, 3, 3) conv10_2_feats = out # (N, 256, 3, 3) out = F.relu(self.conv11_1(out)) # (N, 128, 3, 3) conv11_2_feats = F.relu(self.conv11_2(out)) # (N, 256, 1, 1) # Higher-level feature maps return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_featsxxxxxxxxxx## Prediction layersAt this point we have our 6 feature maps.The low level feature maps:(N, 512, 38, 38), (N, 1024, 19, 19)Also the high level feature maps:(N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1)Each prior box requires a classification output of size number of classes and also the 4 box location values that are regressed. These convolutions are created in the init function.In the forward pass all the convolutions are performed on their respective input feature maps. After that there is some work done to modify the tensors and then concatonate them in order to have the classification output shaped like (N, 8732, n_classes) and the box output to be (N, 8732, 4). This is a format that will be easier to work with when the network output is passed to the loss function during training or the output is passed through NMS during testing.At this point we have our 6 feature maps.
The low level feature maps: (N, 512, 38, 38), (N, 1024, 19, 19)
Also the high level feature maps: (N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1)
Each prior box requires a classification output of size number of classes and also the 4 box location values that are regressed. These convolutions are created in the init function.
In the forward pass all the convolutions are performed on their respective input feature maps. After that there is some work done to modify the tensors and then concatonate them in order to have the classification output shaped like (N, 8732, n_classes) and the box output to be (N, 8732, 4). This is a format that will be easier to work with when the network output is passed to the loss function during training or the output is passed through NMS during testing.
xxxxxxxxxxclass PredictionConvolutions(nn.Module): """ Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps. The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes. See 'cxcy_to_gcxgcy' in utils.py for the encoding definition. The class scores represent the scores of each object class in each of the 8732 bounding boxes located. A high score for 'background' = no object. """ def __init__(self, n_classes): """ :param n_classes: number of different types of objects """ super(PredictionConvolutions, self).__init__() self.n_classes = n_classes # Number of prior-boxes we are considering per position in each feature map n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6, 'conv10_2': 4, 'conv11_2': 4} # 4 prior-boxes implies we use 4 different aspect ratios, etc. # Localization prediction convolutions (predict offsets w.r.t prior-boxes) self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4, kernel_size=3, padding=1) self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=3, padding=1) self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4, kernel_size=3, padding=1) self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4, kernel_size=3, padding=1) self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4, kernel_size=3, padding=1) self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4, kernel_size=3, padding=1) # Class prediction convolutions (predict classes in localization boxes) self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes, kernel_size=3, padding=1) self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes, kernel_size=3, padding=1) self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes, kernel_size=3, padding=1) self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes, kernel_size=3, padding=1) # Initialize convolutions' parameters self.init_conv2d() def init_conv2d(self): """ Initialize convolution parameters. """ for c in self.children(): if isinstance(c, nn.Conv2d): nn.init.xavier_uniform_(c.weight) nn.init.constant_(c.bias, 0.) def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats): """ Forward propagation. :param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38) :param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19) :param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10) :param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5) :param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3) :param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1) :return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image """ batch_size = conv4_3_feats.size(0) # Predict localization boxes' bounds (as offsets w.r.t prior-boxes) l_conv4_3 = self.loc_conv4_3(conv4_3_feats) # (N, 16, 38, 38) l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous() # (N, 38, 38, 16), to match prior-box order (after .view()) # (.contiguous() ensures it is stored in a contiguous chunk of memory, needed for .view() below) l_conv4_3 = l_conv4_3.view(batch_size, -1, 4) # (N, 5776, 4), there are a total 5776 boxes on this feature map l_conv7 = self.loc_conv7(conv7_feats) # (N, 24, 19, 19) l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 24) l_conv7 = l_conv7.view(batch_size, -1, 4) # (N, 2166, 4), there are a total 2116 boxes on this feature map l_conv8_2 = self.loc_conv8_2(conv8_2_feats) # (N, 24, 10, 10) l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 24) l_conv8_2 = l_conv8_2.view(batch_size, -1, 4) # (N, 600, 4) l_conv9_2 = self.loc_conv9_2(conv9_2_feats) # (N, 24, 5, 5) l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 24) l_conv9_2 = l_conv9_2.view(batch_size, -1, 4) # (N, 150, 4) l_conv10_2 = self.loc_conv10_2(conv10_2_feats) # (N, 16, 3, 3) l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 16) l_conv10_2 = l_conv10_2.view(batch_size, -1, 4) # (N, 36, 4) l_conv11_2 = self.loc_conv11_2(conv11_2_feats) # (N, 16, 1, 1) l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 16) l_conv11_2 = l_conv11_2.view(batch_size, -1, 4) # (N, 4, 4) # Predict classes in localization boxes c_conv4_3 = self.cl_conv4_3(conv4_3_feats) # (N, 4 * n_classes, 38, 38) c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous() # (N, 38, 38, 4 * n_classes), to match prior-box order (after .view()) c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes) # (N, 5776, n_classes), there are a total 5776 boxes on this feature map c_conv7 = self.cl_conv7(conv7_feats) # (N, 6 * n_classes, 19, 19) c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous() # (N, 19, 19, 6 * n_classes) c_conv7 = c_conv7.view(batch_size, -1, self.n_classes) # (N, 2166, n_classes), there are a total 2116 boxes on this feature map c_conv8_2 = self.cl_conv8_2(conv8_2_feats) # (N, 6 * n_classes, 10, 10) c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous() # (N, 10, 10, 6 * n_classes) c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes) # (N, 600, n_classes) c_conv9_2 = self.cl_conv9_2(conv9_2_feats) # (N, 6 * n_classes, 5, 5) c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous() # (N, 5, 5, 6 * n_classes) c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes) # (N, 150, n_classes) c_conv10_2 = self.cl_conv10_2(conv10_2_feats) # (N, 4 * n_classes, 3, 3) c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous() # (N, 3, 3, 4 * n_classes) c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes) # (N, 36, n_classes) c_conv11_2 = self.cl_conv11_2(conv11_2_feats) # (N, 4 * n_classes, 1, 1) c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous() # (N, 1, 1, 4 * n_classes) c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes) # (N, 4, n_classes) # A total of 8732 boxes # Concatenate in this specific order (i.e. must match the order of the prior-boxes) locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2, l_conv10_2, l_conv11_2], dim=1) # (N, 8732, 4) classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2, c_conv9_2, c_conv10_2, c_conv11_2], dim=1) # (N, 8732, n_classes) return locs, classes_scoresxxxxxxxxxx## The SSD300 Modelinit - Defines all network layers and created prior boxescreate_prior_boxes - Create 8732 prior boxes across the 6 feature mapsforward - Send the input data through the three network components and then return the predicted locations and classification scores.detect_objects - After a forward pass the predicted objects can be sent to this function during testing in order to perform NMS for the final output.### Answer the follwowing questions after reading the NMS code and comparing it to the version in the lecture notes / tutorial.1. What variables within the batch_size for loop represent "D" and "$\bar{B}$"?D := `all_image_boxes` \$\bar{B}$ := `image_boxes`2. The NMS psuedo code is written with operations such as union and set subtraction. Within the NMS python code how are boxes selected in order to be added to the "D" output?`score_above_min_score` is created by comparing the predicted class scores for each bounding box with the minimum score \ overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score)this is used to create `class_decoded_locs` which is used to compute the IOU `overlap` of all the predictions \then iteratively, predicted boxes are suppressed whose overlaps (with each other box) are greater than a maximum overlap \then using `class_decoded_locs[1 - suppress]`, the non-suppressed predictions are kept and stored in `D`init - Defines all network layers and created prior boxes
create_prior_boxes - Create 8732 prior boxes across the 6 feature maps
forward - Send the input data through the three network components and then return the predicted locations and classification scores.
detect_objects - After a forward pass the predicted objects can be sent to this function during testing in order to perform NMS for the final output.
D := all_image_boxes
:= image_boxes
score_above_min_score is created by comparing the predicted class scores for each bounding box with the minimum score
overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score)
this is used to create class_decoded_locs which is used to compute the IOU overlap of all the predictions
then iteratively, predicted boxes are suppressed whose overlaps (with each other box) are greater than a maximum overlap
then using class_decoded_locs[1 - suppress], the non-suppressed predictions are kept and stored in D
xxxxxxxxxxclass SSD300(nn.Module): """ The SSD300 network - encapsulates the base network, auxiliary, and prediction convolutions. """ def __init__(self, n_classes, base_type): super(SSD300, self).__init__() self.n_classes = n_classes if base_type == 'VGG': self.base = VGGBase() elif base_type == 'ResNet': self.base = ResNetBase() else: raise NotImplementedError self.aux_convs = AuxiliaryConvolutions() self.pred_convs = PredictionConvolutions(n_classes) # Since lower level features (conv4_3_feats) have considerably larger scales, we take the L2 norm and rescale # Rescale factor is initially set at 20, but is learned for each channel during back-prop self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1)) # there are 512 channels in conv4_3_feats nn.init.constant_(self.rescale_factors, 20) # Prior boxes self.priors_cxcy = self.create_prior_boxes() def forward(self, image): """ Forward propagation. :param image: images, a tensor of dimensions (N, 3, 300, 300) :return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image """ # Run VGG base network convolutions (lower level feature map generators) conv4_3_feats, conv7_feats = self.base(image) # (N, 512, 38, 38), (N, 1024, 19, 19) # Rescale conv4_3 after L2 norm norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt() # (N, 1, 38, 38) conv4_3_feats = conv4_3_feats / norm # (N, 512, 38, 38) conv4_3_feats = conv4_3_feats * self.rescale_factors # (N, 512, 38, 38) # (PyTorch autobroadcasts singleton dimensions during arithmetic) # Run auxiliary convolutions (higher level feature map generators) conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = \ self.aux_convs(conv7_feats) # (N, 512, 10, 10), (N, 256, 5, 5), (N, 256, 3, 3), (N, 256, 1, 1) # Run prediction convolutions (predict offsets w.r.t prior-boxes and classes in each resulting localization box) locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats, conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats) # (N, 8732, 4), (N, 8732, n_classes) return locs, classes_scores def create_prior_boxes(self): """ Create the 8732 prior (default) boxes for the SSD300, as defined in the paper. :return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4) """ fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2': 5, 'conv10_2': 3, 'conv11_2': 1} obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375, 'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9} aspect_ratios = {'conv4_3': [1., 2., 0.5], 'conv7': [1., 2., 3., 0.5, .333], 'conv8_2': [1., 2., 3., 0.5, .333], 'conv9_2': [1., 2., 3., 0.5, .333], 'conv10_2': [1., 2., 0.5], 'conv11_2': [1., 2., 0.5]} fmaps = list(fmap_dims.keys()) prior_boxes = [] for k, fmap in enumerate(fmaps): for i in range(fmap_dims[fmap]): for j in range(fmap_dims[fmap]): cx = (j + 0.5) / fmap_dims[fmap] cy = (i + 0.5) / fmap_dims[fmap] for ratio in aspect_ratios[fmap]: prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt(ratio), obj_scales[fmap] / sqrt(ratio)]) # For an aspect ratio of 1, use an additional prior whose scale is the geometric mean of the # scale of the current feature map and the scale of the next feature map if ratio == 1.: try: additional_scale = sqrt(obj_scales[fmap] * obj_scales[fmaps[k + 1]]) # For the last feature map, there is no "next" feature map except IndexError: additional_scale = 1. prior_boxes.append([cx, cy, additional_scale, additional_scale]) prior_boxes = torch.FloatTensor(prior_boxes).to(device) # (8732, 4) prior_boxes.clamp_(0, 1) # (8732, 4) return prior_boxes def detect_objects(self, predicted_locs, predicted_scores, min_score, max_overlap, top_k): """ Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects. For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold. :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) :param min_score: minimum threshold for a box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :return: detections (boxes, labels, and scores), lists of length batch_size """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) predicted_scores = F.softmax(predicted_scores, dim=2) # (N, 8732, n_classes) # Lists to store final predicted boxes, labels, and scores for all images all_images_boxes = list() all_images_labels = list() all_images_scores = list() assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) for i in range(batch_size): # Decode object coordinates from the form we regressed predicted boxes to decoded_locs = cxcy_to_xy( gcxgcy_to_cxcy(predicted_locs[i], self.priors_cxcy)) # (8732, 4), these are fractional pt. coordinates # Lists to store boxes and scores for this image image_boxes = list() image_labels = list() image_scores = list() max_scores, best_label = predicted_scores[i].max(dim=1) # (8732) # Check for each class for c in range(1, self.n_classes): # Keep only predicted boxes and scores where scores for this class are above the minimum score class_scores = predicted_scores[i][:, c] # (8732) score_above_min_score = class_scores > min_score # torch.uint8 (byte) tensor, for indexing n_above_min_score = score_above_min_score.sum().item() if n_above_min_score == 0: continue class_scores = class_scores[score_above_min_score] # (n_qualified), n_min_score <= 8732 class_decoded_locs = decoded_locs[score_above_min_score] # (n_qualified, 4) # Sort predicted boxes and scores by scores class_scores, sort_ind = class_scores.sort(dim=0, descending=True) # (n_qualified), (n_min_score) class_decoded_locs = class_decoded_locs[sort_ind] # (n_min_score, 4) # Find the overlap between predicted boxes overlap = find_jaccard_overlap(class_decoded_locs, class_decoded_locs) # (n_qualified, n_min_score) # Non-Maximum Suppression (NMS) # A torch.uint8 (byte) tensor to keep track of which predicted boxes to suppress # 1 implies suppress, 0 implies don't suppress suppress = torch.zeros((n_above_min_score), dtype=torch.uint8).to(device) # (n_qualified) # Consider each box in order of decreasing scores for box in range(class_decoded_locs.size(0)): # If this box is already marked for suppression if suppress[box] == 1: continue # Suppress boxes whose overlaps (with this box) are greater than maximum overlap # Find such boxes and update suppress indices suppress = torch.max(suppress, overlap[box] > max_overlap) # The max operation retains previously suppressed boxes, like an 'OR' operation # Don't suppress this box, even though it has an overlap of 1 with itself suppress[box] = 0 # Store only unsuppressed boxes for this class image_boxes.append(class_decoded_locs[1 - suppress]) image_labels.append(torch.LongTensor((1 - suppress).sum().item() * [c]).to(device)) image_scores.append(class_scores[1 - suppress]) # If no object in any class is found, store a placeholder for 'background' if len(image_boxes) == 0: image_boxes.append(torch.FloatTensor([[0., 0., 1., 1.]]).to(device)) image_labels.append(torch.LongTensor([0]).to(device)) image_scores.append(torch.FloatTensor([0.]).to(device)) # Concatenate into single tensors image_boxes = torch.cat(image_boxes, dim=0) # (n_objects, 4) image_labels = torch.cat(image_labels, dim=0) # (n_objects) image_scores = torch.cat(image_scores, dim=0) # (n_objects) n_objects = image_scores.size(0) # Keep only the top k objects if n_objects > top_k: image_scores, sort_ind = image_scores.sort(dim=0, descending=True) image_scores = image_scores[:top_k] # (top_k) image_boxes = image_boxes[sort_ind][:top_k] # (top_k, 4) image_labels = image_labels[sort_ind][:top_k] # (top_k) # Append to lists that store predicted boxes and scores for all images all_images_boxes.append(image_boxes) all_images_labels.append(image_labels) all_images_scores.append(image_scores) return all_images_boxes, all_images_labels, all_images_scores # lists of length batch_sizexxxxxxxxxx## The MultiBoxLossDuring training the output from the SSD forward pass is then sent to the criterion (set to this function) in order to calculate the loss.During training the output from the SSD forward pass is then sent to the criterion (set to this function) in order to calculate the loss.
xxxxxxxxxxclass MultiBoxLoss(nn.Module): """ The MultiBox loss, a loss function for object detection. This is a combination of: (1) a localization loss for the predicted locations of the boxes, and (2) a confidence loss for the predicted class scores. """ def __init__(self, priors_cxcy, threshold=0.5, neg_pos_ratio=3, alpha=1.): super(MultiBoxLoss, self).__init__() self.priors_cxcy = priors_cxcy self.priors_xy = cxcy_to_xy(priors_cxcy) self.threshold = threshold self.neg_pos_ratio = neg_pos_ratio self.alpha = alpha self.smooth_l1 = nn.L1Loss() self.cross_entropy = nn.CrossEntropyLoss(reduce=False) def forward(self, predicted_locs, predicted_scores, boxes, labels): """ Forward propagation. :param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4) :param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes) :param boxes: true object bounding boxes in boundary coordinates, a list of N tensors :param labels: true object labels, a list of N tensors :return: multibox loss, a scalar """ batch_size = predicted_locs.size(0) n_priors = self.priors_cxcy.size(0) n_classes = predicted_scores.size(2) assert n_priors == predicted_locs.size(1) == predicted_scores.size(1) true_locs = torch.zeros((batch_size, n_priors, 4), dtype=torch.float).to(device) # (N, 8732, 4) true_classes = torch.zeros((batch_size, n_priors), dtype=torch.long).to(device) # (N, 8732) # For each image for i in range(batch_size): n_objects = boxes[i].size(0) overlap = find_jaccard_overlap(boxes[i], self.priors_xy) # (n_objects, 8732) # For each prior, find the object that has the maximum overlap overlap_for_each_prior, object_for_each_prior = overlap.max(dim=0) # (8732) # We don't want a situation where an object is not represented in our positive (non-background) priors - # 1. An object might not be the best object for all priors, and is therefore not in object_for_each_prior. # 2. All priors with the object may be assigned as background based on the threshold (0.5). # To remedy this - # First, find the prior that has the maximum overlap for each object. _, prior_for_each_object = overlap.max(dim=1) # (N_o) # Then, assign each object to the corresponding maximum-overlap-prior. (This fixes 1.) object_for_each_prior[prior_for_each_object] = torch.LongTensor(range(n_objects)).to(device) # To ensure these priors qualify, artificially give them an overlap of greater than 0.5. (This fixes 2.) overlap_for_each_prior[prior_for_each_object] = 1. # Labels for each prior label_for_each_prior = labels[i][object_for_each_prior] # (8732) # Set priors whose overlaps with objects are less than the threshold to be background (no object) label_for_each_prior[overlap_for_each_prior < self.threshold] = 0 # (8732) # Store true_classes[i] = label_for_each_prior # Encode center-size object coordinates into the form we regressed predicted boxes to true_locs[i] = cxcy_to_gcxgcy(xy_to_cxcy(boxes[i][object_for_each_prior]), self.priors_cxcy) # (8732, 4) # Identify priors that are positive (object/non-background) positive_priors = true_classes != 0 # (N, 8732) # LOCALIZATION LOSS # Localization loss is computed only over positive (non-background) priors loc_loss = self.smooth_l1(predicted_locs[positive_priors], true_locs[positive_priors]) # (), scalar # Note: indexing with a torch.uint8 (byte) tensor flattens the tensor when indexing is across multiple dimensions (N & 8732) # So, if predicted_locs has the shape (N, 8732, 4), predicted_locs[positive_priors] will have (total positives, 4) # CONFIDENCE LOSS # Confidence loss is computed over positive priors and the most difficult (hardest) negative priors in each image # That is, FOR EACH IMAGE, # we will take the hardest (neg_pos_ratio * n_positives) negative priors, i.e where there is maximum loss # This is called Hard Negative Mining - it concentrates on hardest negatives in each image, and also minimizes pos/neg imbalance # Number of positive and hard-negative priors per image n_positives = positive_priors.sum(dim=1) # (N) n_hard_negatives = self.neg_pos_ratio * n_positives # (N) # First, find the loss for all priors conf_loss_all = self.cross_entropy(predicted_scores.view(-1, n_classes), true_classes.view(-1)) # (N * 8732) conf_loss_all = conf_loss_all.view(batch_size, n_priors) # (N, 8732) # We already know which priors are positive conf_loss_pos = conf_loss_all[positive_priors] # (sum(n_positives)) # Next, find which priors are hard-negative # To do this, sort ONLY negative priors in each image in order of decreasing loss and take top n_hard_negatives conf_loss_neg = conf_loss_all.clone() # (N, 8732) conf_loss_neg[positive_priors] = 0. # (N, 8732), positive priors are ignored (never in top n_hard_negatives) conf_loss_neg, _ = conf_loss_neg.sort(dim=1, descending=True) # (N, 8732), sorted by decreasing hardness hardness_ranks = torch.LongTensor(range(n_priors)).unsqueeze(0).expand_as(conf_loss_neg).to(device) # (N, 8732) hard_negatives = hardness_ranks < n_hard_negatives.unsqueeze(1) # (N, 8732) conf_loss_hard_neg = conf_loss_neg[hard_negatives] # (sum(n_hard_negatives)) # As in the paper, averaged over positive priors only, although computed over both positive and hard-negative priors conf_loss = (conf_loss_hard_neg.sum() + conf_loss_pos.sum()) / n_positives.sum().float() # (), scalar # TOTAL LOSS return conf_loss + self.alpha * loc_lossxxxxxxxxxx# TrainingWith the model implemented it is time to train. Should take 2 hours and 9 minutes for 10 epochs. Should take 1 hour and 5 minutes for only the VOC2007 dataset with 16 epochs.With the model implemented it is time to train. Should take 2 hours and 9 minutes for 10 epochs. Should take 1 hour and 5 minutes for only the VOC2007 dataset with 16 epochs.
xxxxxxxxxximport timeimport torch.backends.cudnn as cudnnimport torch.optimimport torch.utils.data# from model import SSD300, MultiBoxLoss# from datasets import PascalVOCDatasetfrom utils import *# TODO: Import a learning rate schedulerimport torch.optim.lr_scheduler as lr_scheduler# Data parametersdata_folder = './' # folder with data fileskeep_difficult = True # use objects considered difficult to detect?# Model parameters# Not too many here since the SSD300 has a very specific structuren_classes = len(label_map) # number of different types of objectsdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")# Learning parameterscheckpoint = None # path to model checkpoint, None if nonebatch_size = 6 # batch sizeiterations = 15000 # 120000 # number of iterations to train (DON'T CHANGE)workers = 4 # number of workers for loading data in the DataLoaderprint_freq = 200 # print training status every __ batchesmomentum = 0.9 # momentumweight_decay = 5e-4 # weight decaygrad_clip = None # clip if gradients are exploding, which may happen at larger batch sizes (sometimes at 32) - you will recognize it by a sorting error in the MuliBox loss calculationcudnn.benchmark = True# Overwrite the checkpoint function in utilsdef save_checkpoint(epoch, model, optimizer, base_type, scheduler): """ Save model checkpoint. :param epoch: epoch number :param model: model :param optimizer: optimizer :param base_type: The base network type """ state = {'epoch': epoch, 'model': model, 'optimizer': optimizer, 'scheduler': scheduler} if scheduler == None: filename = 'checkpoint_ssd300_' + base_type + '.pth.tar' else: filename = 'checkpoint_ssd300_' + base_type + '_scheduler.pth.tar' torch.save(state, filename)def train_SSD(base_type, lr_type): """ Training. """ global start_epoch, label_map, epoch, checkpoint, decay_lr_at # Custom dataloaders train_dataset = PascalVOCDataset(data_folder, split='train', keep_difficult=keep_difficult) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=train_dataset.collate_fn, num_workers=workers, pin_memory=True) # note that we're passing the collate function here if lr_type == 'original_scheduler': lr = 1e-3 # learning rate decay_lr_at = [10000, 12500] # [80000, 100000] # decay learning rate after these many iterations decay_lr_to = 0.1 # decay learning rate to this fraction of the existing learning rate elif lr_type == 'pytorch_scheduler': lr = 1e-3 # learning rate else: raise NotImplementedError # Calculate total number of epochs to train and the epochs to decay learning rate at (i.e. convert iterations to epochs) # To convert iterations to epochs, divide iterations by the number of iterations per epoch # The original paper trains for 120,000 iterations with a batch size of 32, decays after 80,000 and 100,000 iterations epochs = iterations // (len(train_dataset) // batch_size) print("Number of iterations", iterations) print("Dataset length", len(train_dataset)) print("batch size", batch_size) print("Number of Epochs to train:", epochs) if lr_type == 'original_scheduler': decay_lr_at = [it // (len(train_dataset) // batch_size) for it in decay_lr_at] print("Epochs to decay learning rate:", decay_lr_at) # Initialize model or load checkpoint if checkpoint is None: start_epoch = 0 model = SSD300(n_classes=n_classes, base_type=base_type) # Initialize the optimizer, with twice the default learning rate for biases, as in the original Caffe repo biases = list() not_biases = list() for param_name, param in model.named_parameters(): if param.requires_grad: if param_name.endswith('.bias'): biases.append(param) else: not_biases.append(param) optimizer = torch.optim.SGD(params=[{'params': biases, 'lr': 2 * lr}, {'params': not_biases}], lr=lr, momentum=momentum, weight_decay=weight_decay) if lr_type == 'pytorch_scheduler': # TODO: Create new scheduler # raise NotImplementedError scheduler = lr_scheduler.StepLR(optimizer, step_size=epochs*0.6, gamma=0.1) else: checkpoint = torch.load(checkpoint) start_epoch = checkpoint['epoch'] + 1 print('\nLoaded checkpoint from epoch %d.\n' % start_epoch) model = checkpoint['model'] optimizer = checkpoint['optimizer'] if lr_type == 'pytorch_scheduler': # TODO: Load scheduler # raise NotImplementedError scheduler = checkpoint['scheduler'] # Move to default device model = model.to(device) criterion = MultiBoxLoss(priors_cxcy=model.priors_cxcy).to(device) # Epochs for epoch in range(start_epoch, epochs): # Decay learning rate at particular epochs if lr_type == 'original_scheduler': if epoch in decay_lr_at: adjust_learning_rate(optimizer, decay_lr_to) # One epoch's training start_epoch_time = time.time() train(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, epoch=epoch) end_epoch_time = time.time() print("One epoch time elapsed:", end_epoch_time - start_epoch_time) # TODO: Update the learning rate if lr_type == 'pytorch_scheduler': # raise NotImplementedError scheduler.step() # Save checkpoint if lr_type == 'original_scheduler': save_checkpoint(epoch, model, optimizer, base_type, scheduler=None) else: # TODO: Call save_checkpoint with your scheduler # raise NotImplementedError save_checkpoint(epoch, model, optimizer, base_type, scheduler=scheduler)def train(train_loader, model, criterion, optimizer, epoch): """ One epoch's training. :param train_loader: DataLoader for training data :param model: model :param criterion: MultiBox loss :param optimizer: optimizer :param epoch: epoch number """ model.train() # training mode enables dropout batch_time = AverageMeter() # forward prop. + back prop. time data_time = AverageMeter() # data loading time losses = AverageMeter() # loss start = time.time() # Batches for i, (images, boxes, labels, _) in enumerate(train_loader): data_time.update(time.time() - start) # Move to default device images = images.to(device) # (batch_size (N), 3, 300, 300) boxes = [b.to(device) for b in boxes] labels = [l.to(device) for l in labels] # Forward prop. predicted_locs, predicted_scores = model(images) # (N, 8732, 4), (N, 8732, n_classes) # Loss loss = criterion(predicted_locs, predicted_scores, boxes, labels) # scalar # Backward prop. optimizer.zero_grad() loss.backward() # Clip gradients, if necessary if grad_clip is not None: clip_gradient(optimizer, grad_clip) # Update model optimizer.step() losses.update(loss.item(), images.size(0)) batch_time.update(time.time() - start) start = time.time() # Print status if i % print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data Time {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses)) del predicted_locs, predicted_scores, images, boxes, labels # free some memory since their histories may be storedxxxxxxxxxx## Training SSD300 with VGG and the original learning rate adjusterThis can be run without making any changes to the code.This can be run without making any changes to the code.
xxxxxxxxxxstart_time = time.time()train_SSD(base_type='VGG', lr_type='original_scheduler')end_time = time.time()print("time elapsed:", end_time - start_time)xxxxxxxxxx***I am running out of time, so will reduce # of iterations and therefore # of epochs for training***I am running out of time, so will reduce # of iterations and therefore # of epochs for training
xxxxxxxxxxiterations = iterations//4iterationsxxxxxxxxxx## Training SSD300 with ResNet and the original learning rate adjusterThis should be run after implementing the ResNet Base.This should be run after implementing the ResNet Base.
xxxxxxxxxxstart_time = time.time()train_SSD(base_type='ResNet', lr_type='original_scheduler')end_time = time.time()print("time elapsed:", end_time - start_time)xxxxxxxxxx## Training SSD300 with VGG and using a PyTorch learning rate schedulerThis should be run after modifyng the training loop to use a learning rate scheduler.This should be run after modifyng the training loop to use a learning rate scheduler.
xxxxxxxxxxstart_time = time.time()train_SSD(base_type='VGG', lr_type='pytorch_scheduler')end_time = time.time()print("time elapsed:", end_time - start_time)xxxxxxxxxx# TestingNow let's run the eval code, it should take about 30 minutes per model.Now let's run the eval code, it should take about 30 minutes per model.
xxxxxxxxxxfrom utils import *# from datasets import PascalVOCDatasetfrom tqdm import tqdmfrom pprint import PrettyPrinter# Good formatting when printing the APs for each class and mAPpp = PrettyPrinter()# Parametersdata_folder = './'keep_difficult = True # difficult ground truth objects must always be considered in mAP calculation, because these objects DO exist!batch_size = 64workers = 4device = torch.device("cuda" if torch.cuda.is_available() else "cpu")checkpoint = './checkpoint_ssd300_VGG.pth.tar'# Load model checkpoint that is to be evaluatedcheckpoint = torch.load(checkpoint)model = checkpoint['model']model = model.to(device)# Switch to eval modemodel.eval()# Load test datatest_dataset = PascalVOCDataset(data_folder, split='test', keep_difficult=keep_difficult)test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)def evaluate(test_loader, model): """ Evaluate. :param test_loader: DataLoader for test data :param model: model """ # Make sure it's in eval mode model.eval() # Lists to store detected and true boxes, labels, scores det_boxes = list() det_labels = list() det_scores = list() true_boxes = list() true_labels = list() true_difficulties = list() # it is necessary to know which objects are 'difficult', see 'calculate_mAP' in utils.py with torch.no_grad(): # Batches for i, (images, boxes, labels, difficulties) in enumerate(tqdm(test_loader, desc='Evaluating')): images = images.to(device) # (N, 3, 300, 300) # Forward prop. predicted_locs, predicted_scores = model(images) # Detect objects in SSD output det_boxes_batch, det_labels_batch, det_scores_batch = model.detect_objects(predicted_locs, predicted_scores, min_score=0.01, max_overlap=0.45, top_k=200) # Evaluation MUST be at min_score=0.01, max_overlap=0.45, top_k=200 for fair comparision with the paper's results and other repos # Store this batch's results for mAP calculation boxes = [b.to(device) for b in boxes] labels = [l.to(device) for l in labels] difficulties = [d.to(device) for d in difficulties] det_boxes.extend(det_boxes_batch) det_labels.extend(det_labels_batch) det_scores.extend(det_scores_batch) true_boxes.extend(boxes) true_labels.extend(labels) true_difficulties.extend(difficulties) # Calculate mAP APs, mAP = calculate_mAP(det_boxes, det_labels, det_scores, true_boxes, true_labels, true_difficulties) # Print AP for each class pp.pprint(APs) print('\nMean Average Precision (mAP): %.3f' % mAP)xxxxxxxxxx## Testing SSD300 with VGG and the original learning rate adjusterYour model should output an mAP about the same as this:Mean Average Precision (mAP): 0.589Your model should output an mAP about the same as this:
Mean Average Precision (mAP): 0.589
checkpoint = './checkpoint_ssd300_VGG.pth.tar'# Load model checkpoint that is to be evaluatedcheckpoint = torch.load(checkpoint)model = checkpoint['model']model = model.to(device)# Switch to eval modemodel.eval()# Load test datatest_dataset = PascalVOCDataset(data_folder, split='test', keep_difficult=keep_difficult)test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)evaluate(test_loader, model)xxxxxxxxxx## Testing SSD300 with ResNet and the original learning rate adjusterxxxxxxxxxxcheckpoint = './checkpoint_ssd300_ResNet.pth.tar'# Load model checkpoint that is to be evaluatedcheckpoint = torch.load(checkpoint)model = checkpoint['model']model = model.to(device)# Switch to eval modemodel.eval()# Load test datatest_dataset = PascalVOCDataset(data_folder, split='test', keep_difficult=keep_difficult)test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)evaluate(test_loader, model)xxxxxxxxxx## Testing SSD300 with VGG and using a PyTorch learning rate schedulerxxxxxxxxxxcheckpoint = './checkpoint_ssd300_VGG_scheduler.pth.tar'# Load model checkpoint that is to be evaluatedcheckpoint = torch.load(checkpoint)model = checkpoint['model']model = model.to(device)# Switch to eval modemodel.eval()# Load test datatest_dataset = PascalVOCDataset(data_folder, split='test', keep_difficult=keep_difficult)test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_dataset.collate_fn, num_workers=workers, pin_memory=True)evaluate(test_loader, model)xxxxxxxxxx# Viewing resultsAnd lastly let's view some images with our detections!And lastly let's view some images with our detections!
xxxxxxxxxx!lsxxxxxxxxxxfrom torchvision import transformsfrom utils import *from PIL import Image, ImageDraw, ImageFontimport matplotlib.pyplot as pltdevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")# Load model checkpointcheckpoint = 'checkpoint_ssd300_VGG.pth.tar'checkpoint = torch.load(checkpoint)start_epoch = checkpoint['epoch'] + 1print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)model = checkpoint['model']model = model.to(device)model.eval()# Transformsresize = transforms.Resize((300, 300))to_tensor = transforms.ToTensor()normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])def detect(original_image, min_score, max_overlap, top_k, suppress=None): """ Detect objects in an image with a trained SSD300, and visualize the results. :param original_image: image, a PIL Image :param min_score: minimum threshold for a detected box to be considered a match for a certain class :param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS) :param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k' :param suppress: classes that you know for sure cannot be in the image or you do not want in the image, a list :return: annotated image, a PIL Image """ # Transform image = normalize(to_tensor(resize(original_image))) # Move to default device image = image.to(device) # Forward prop. predicted_locs, predicted_scores = model(image.unsqueeze(0)) # Detect objects in SSD output det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score, max_overlap=max_overlap, top_k=top_k) # Move detections to the CPU det_boxes = det_boxes[0].to('cpu') # Transform to original image dimensions original_dims = torch.FloatTensor( [original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0) det_boxes = det_boxes * original_dims # Decode class integer labels det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()] # If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py if det_labels == ['background']: # Just return original image return original_image # Annotate annotated_image = original_image draw = ImageDraw.Draw(annotated_image) font = ImageFont.load_default() # ImageFont.truetype("./calibril.ttf", 15) # Suppress specific classes, if needed for i in range(det_boxes.size(0)): if suppress is not None: if det_labels[i] in suppress: continue # Boxes box_location = det_boxes[i].tolist() draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]]) draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[ det_labels[i]]) # a second rectangle at an offset of 1 pixel to increase line thickness # draw.rectangle(xy=[l + 2. for l in box_location], outline=label_color_map[ # det_labels[i]]) # a third rectangle at an offset of 1 pixel to increase line thickness # draw.rectangle(xy=[l + 3. for l in box_location], outline=label_color_map[ # det_labels[i]]) # a fourth rectangle at an offset of 1 pixel to increase line thickness # Text text_size = font.getsize(det_labels[i].upper()) text_location = [box_location[0] + 2., box_location[1] - text_size[1]] textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4., box_location[1]] draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]]) draw.text(xy=text_location, text=det_labels[i].upper(), fill='white', font=font) del draw return annotated_imagerelevant_images = [ '000012.jpg', # Car '000014.jpg', # Car, Bus '000026.jpg', # Car '000038.jpg', # Cyclist '000054.jpg', # Bus '000091.jpg', # Vehicles parked, far from camera '000111.jpg', # Cyclists in race, far from camera '000129.jpg' # Cyclists in race, close to camera]for rel_img_file_name in relevant_images: img_path = '/content/gdrive/MyDrive/Colab Notebooks/ece495_assignment4/VOCdevkit/VOC2007/JPEGImages/' + rel_img_file_name original_image = Image.open(img_path, mode='r') original_image = original_image.convert('RGB') img = detect(original_image, min_score=0.2, max_overlap=0.5, top_k=200) fig = plt.figure(figsize=(10,10)) ax1 = fig.add_subplot(1,1,1) ax1.imshow(img)xxxxxxxxxx